*/
void ack_bad_irq(unsigned int irq)
{
- printk("unexpected IRQ trap at vector %02x\n", irq);
+ printk("unexpected IRQ trap at irq %02x\n", irq);
/*
* Currently unexpected vectors happen only on SMP and APIC.
* We _must_ ack these because every local APIC has only N
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs * regs)
{
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
perfc_incr(apic_timer);
raise_softirq(TIMER_SOFTIRQ);
+ set_irq_regs(old_regs);
}
/*
fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs)
{
unsigned long v;
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
irq_enter();
/*
printk(KERN_INFO "spurious APIC interrupt on CPU#%d, should never happen.\n",
smp_processor_id());
irq_exit();
+ set_irq_regs(old_regs);
}
/*
fastcall void smp_error_interrupt(struct cpu_user_regs *regs)
{
unsigned long v, v1;
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
irq_enter();
/* First tickle the hardware, only then report what went on. -- REW */
printk (KERN_DEBUG "APIC error on CPU%d: %02lx(%02lx)\n",
smp_processor_id(), v , v1);
irq_exit();
+ set_irq_regs(old_regs);
}
/*
fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs)
{
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
hvm_do_pmu_interrupt(regs);
+ set_irq_regs(old_regs);
}
/*
fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs)
{
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
irq_enter();
vendor_thermal_interrupt(regs);
irq_exit();
+ set_irq_regs(old_regs);
}
/* P4/Xeon Thermal regulation detect and init */
{
mctelem_cookie_t mctc;
struct mca_summary bs;
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
irq_enter();
mctelem_dismiss(mctc);
irq_exit();
+ set_irq_regs(old_regs);
}
void mce_intel_feature_init(struct cpuinfo_x86 *c)
}
cpumask_t target_cpus_flat(void)
+{
+ return cpu_online_map;
+}
+
+cpumask_t vector_allocation_domain_flat(int cpu)
{
return cpu_online_map;
}
unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask)
{
- return cpus_addr(cpumask)[0];
+ return cpus_addr(cpumask)[0]&0xFF;
}
-
/*
* PHYSICAL DELIVERY MODE (unicast to physical APIC IDs).
*/
cpumask_t target_cpus_phys(void)
{
- /* IRQs will get bound more accurately later. */
- return cpumask_of_cpu(0);
+ return cpu_online_map;
+}
+
+cpumask_t vector_allocation_domain_phys(int cpu)
+{
+ return cpumask_of_cpu(cpu);
}
unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask)
cpumask_t target_cpus_x2apic(void)
{
- /* Deliver interrupts only to CPU0 for now */
- return cpumask_of_cpu(0);
+ return cpu_online_map;
+}
+
+cpumask_t vector_allocation_domain_x2apic(int cpu)
+{
+ return cpumask_of_cpu(cpu);
}
unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask)
static void hpet_msi_ack(unsigned int irq)
{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
ack_APIC_irq();
}
{
struct msi_msg msg;
unsigned int dest;
- cpumask_t tmp;
- int vector = irq_to_vector(irq);
-
- cpus_and(tmp, mask, cpu_online_map);
- if ( cpus_empty(tmp) )
- mask = TARGET_CPUS;
-
- dest = cpu_mask_to_apicid(mask);
+ struct irq_desc * desc = irq_to_desc(irq);
+ struct irq_cfg *cfg= desc->chip_data;
- hpet_msi_read(vector, &msg);
+ dest = set_desc_affinity(desc, mask);
+ if (dest == BAD_APICID)
+ return;
+ hpet_msi_read(irq, &msg);
msg.data &= ~MSI_DATA_VECTOR_MASK;
- msg.data |= MSI_DATA_VECTOR(vector);
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
- hpet_msi_write(vector, &msg);
- irq_desc[irq].affinity = mask;
+ hpet_msi_write(irq, &msg);
}
/*
asmlinkage void do_IRQ(struct cpu_user_regs *);
fastcall void smp_apic_timer_interrupt(struct cpu_user_regs *);
- fastcall void smp_event_check_interrupt(void);
+ fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs);
fastcall void smp_invalidate_interrupt(void);
- fastcall void smp_call_function_interrupt(void);
+ fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs);
fastcall void smp_spurious_interrupt(struct cpu_user_regs *regs);
fastcall void smp_error_interrupt(struct cpu_user_regs *regs);
fastcall void smp_pmu_apic_interrupt(struct cpu_user_regs *regs);
fastcall void smp_cmci_interrupt(struct cpu_user_regs *regs);
+ fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs);
#ifdef CONFIG_X86_MCE_THERMAL
fastcall void smp_thermal_interrupt(struct cpu_user_regs *regs);
#endif
switch ( vector )
{
+ case IRQ_MOVE_CLEANUP_VECTOR:
+ smp_irq_move_cleanup_interrupt(regs);
+ break;
case LOCAL_TIMER_VECTOR:
smp_apic_timer_interrupt(regs);
break;
case EVENT_CHECK_VECTOR:
- smp_event_check_interrupt();
+ smp_event_check_interrupt(regs);
break;
case INVALIDATE_TLB_VECTOR:
smp_invalidate_interrupt();
break;
case CALL_FUNCTION_VECTOR:
- smp_call_function_interrupt();
+ smp_call_function_interrupt(regs);
break;
case SPURIOUS_APIC_VECTOR:
smp_spurious_interrupt(regs);
* is no hardware IRQ pin equivalent for them, they are triggered
* through the ICC by us (IPIs)
*/
+BUILD_SMP_INTERRUPT(irq_move_cleanup_interrupt,IRQ_MOVE_CLEANUP_VECTOR)
BUILD_SMP_INTERRUPT(event_check_interrupt,EVENT_CHECK_VECTOR)
BUILD_SMP_INTERRUPT(invalidate_interrupt,INVALIDATE_TLB_VECTOR)
BUILD_SMP_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
void __init init_IRQ(void)
{
- int i, vector;
+ int vector, irq, cpu = smp_processor_id();
init_bsp_APIC();
set_intr_gate(vector, interrupt[vector]);
}
- for ( i = 0; i < 16; i++ )
- {
- vector_irq[LEGACY_VECTOR(i)] = i;
- irq_desc[i].handler = &i8259A_irq_type;
+ for (irq = 0; irq < 16; irq++) {
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_cfg *cfg = desc->chip_data;
+
+ desc->handler = &i8259A_irq_type;
+ per_cpu(vector_irq, cpu)[FIRST_LEGACY_VECTOR + irq] = irq;
+ cfg->domain = cpumask_of_cpu(cpu);
+ cfg->vector = FIRST_LEGACY_VECTOR + irq;
}
-
- /* Never allocate the hypercall vector or Linux/BSD fast-trap vector. */
- vector_irq[HYPERCALL_VECTOR] = NEVER_ASSIGN_IRQ;
- vector_irq[0x80] = NEVER_ASSIGN_IRQ;
+
+ per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR] = 0;
apic_intr_init();
#include <xen/pci.h>
#include <xen/pci_regs.h>
#include <xen/keyhandler.h>
-#include <asm/io.h>
#include <asm/mc146818rtc.h>
#include <asm/smp.h>
#include <asm/desc.h>
int apic, pin;
unsigned int next;
} *irq_2_pin;
+
+static int *pin_irq_map;
+
static unsigned int irq_2_pin_free_entry;
+/* Use an arry to record pin_2_irq_mapping */
+static int get_irq_from_apic_pin(int apic, int pin)
+{
+ int i, pin_base = 0;
+
+ ASSERT(apic < nr_ioapics);
+
+ for (i = 0; i < apic; i++)
+ pin_base += nr_ioapic_registers[i];
+
+ return pin_irq_map[pin_base + pin];
+}
+
+static void set_irq_to_apic_pin(int apic, int pin, int irq)
+{
+
+ int i, pin_base = 0;
+
+ ASSERT(apic < nr_ioapics);
+
+ for (i = 0; i < apic; i++)
+ pin_base += nr_ioapic_registers[i];
+
+ pin_irq_map[pin_base + pin] = irq;
+}
+
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
BUG_ON((entry->apic == apic) && (entry->pin == pin));
entry = irq_2_pin + entry->next;
}
-
+
BUG_ON((entry->apic == apic) && (entry->pin == pin));
if (entry->pin != -1) {
}
entry->apic = apic;
entry->pin = pin;
+
+ set_irq_to_apic_pin(apic, pin, irq);
}
static void remove_pin_at_irq(unsigned int irq, int apic, int pin)
entry->next = irq_2_pin_free_entry;
irq_2_pin_free_entry = entry - irq_2_pin;
}
+
+ set_irq_to_apic_pin(apic, pin, -1);
}
/*
* Reroute an IRQ to a different pin.
*/
static void __init replace_pin_at_irq(unsigned int irq,
- int oldapic, int oldpin,
- int newapic, int newpin)
+ int oldapic, int oldpin,
+ int newapic, int newpin)
{
struct irq_pin_list *entry = irq_2_pin + irq;
{
struct IO_APIC_route_entry entry;
unsigned long flags;
-
+
/* Check delivery_mode to be sure we're not clearing an SMI pin */
spin_lock_irqsave(&ioapic_lock, flags);
*(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
}
#ifdef CONFIG_SMP
-static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
+fastcall void smp_irq_move_cleanup_interrupt(struct cpu_user_regs *regs)
+{
+ unsigned vector, me;
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
+
+ ack_APIC_irq();
+ irq_enter();
+
+ me = smp_processor_id();
+ for (vector = FIRST_DYNAMIC_VECTOR; vector < NR_VECTORS; vector++) {
+ unsigned int irq;
+ unsigned int irr;
+ struct irq_desc *desc;
+ struct irq_cfg *cfg;
+ irq = __get_cpu_var(vector_irq)[vector];
+
+ if (irq == -1)
+ continue;
+
+ desc = irq_to_desc(irq);
+ if (!desc)
+ continue;
+
+ cfg = desc->chip_data;
+ spin_lock(&desc->lock);
+ if (!cfg->move_cleanup_count)
+ goto unlock;
+
+ if (vector == cfg->vector && cpu_isset(me, cfg->domain))
+ goto unlock;
+
+ irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+ /*
+ * Check if the vector that needs to be cleanedup is
+ * registered at the cpu's IRR. If so, then this is not
+ * the best time to clean it up. Lets clean it up in the
+ * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
+ * to myself.
+ */
+ if (irr & (1 << (vector % 32))) {
+ genapic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+ goto unlock;
+ }
+ __get_cpu_var(vector_irq)[vector] = -1;
+ cfg->move_cleanup_count--;
+unlock:
+ spin_unlock(&desc->lock);
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+static void send_cleanup_vector(struct irq_cfg *cfg)
+{
+ cpumask_t cleanup_mask;
+
+ cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
+ cfg->move_cleanup_count = cpus_weight(cleanup_mask);
+ genapic->send_IPI_mask(&cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+
+ cfg->move_in_progress = 0;
+}
+
+void irq_complete_move(struct irq_desc **descp)
+{
+ struct irq_desc *desc = *descp;
+ struct irq_cfg *cfg = desc->chip_data;
+ unsigned vector, me;
+
+ if (likely(!cfg->move_in_progress))
+ return;
+
+ vector = get_irq_regs()->entry_vector;
+ me = smp_processor_id();
+
+ if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+ send_cleanup_vector(cfg);
+}
+
+unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask)
+{
+ struct irq_cfg *cfg;
+ unsigned int irq;
+ int ret;
+ cpumask_t dest_mask;
+
+ if (!cpus_intersects(mask, cpu_online_map))
+ return BAD_APICID;
+
+ irq = desc->irq;
+ cfg = desc->chip_data;
+
+ lock_vector_lock();
+ ret = __assign_irq_vector(irq, cfg, mask);
+ unlock_vector_lock();
+
+ if (ret < 0)
+ return BAD_APICID;
+
+ cpus_copy(desc->affinity, mask);
+ cpus_and(dest_mask, desc->affinity, cfg->domain);
+
+ return cpu_mask_to_apicid(dest_mask);
+}
+
+static void
+set_ioapic_affinity_irq_desc(struct irq_desc *desc,
+ const struct cpumask mask)
{
unsigned long flags;
- int pin;
- struct irq_pin_list *entry = irq_2_pin + irq;
- unsigned int apicid_value;
+ unsigned int dest;
+ int pin, irq;
+ struct irq_cfg *cfg;
+ struct irq_pin_list *entry;
- cpus_and(cpumask, cpumask, cpu_online_map);
- if (cpus_empty(cpumask))
- cpumask = TARGET_CPUS;
+ irq = desc->irq;
+ cfg = desc->chip_data;
- apicid_value = cpu_mask_to_apicid(cpumask);
- /* Prepare to do the io_apic_write */
- apicid_value = apicid_value << 24;
spin_lock_irqsave(&ioapic_lock, flags);
- for (;;) {
- pin = entry->pin;
- if (pin == -1)
- break;
- io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
- if (!entry->next)
- break;
- entry = irq_2_pin + entry->next;
+ dest = set_desc_affinity(desc, mask);
+ if (dest != BAD_APICID) {
+ /* Only the high 8 bits are valid. */
+ dest = SET_APIC_LOGICAL_ID(dest);
+ entry = irq_2_pin + irq;
+ for (;;) {
+ unsigned int data;
+ pin = entry->pin;
+ if (pin == -1)
+ break;
+
+ io_apic_write(entry->apic, 0x10 + 1 + pin*2, dest);
+ data = io_apic_read(entry->apic, 0x10 + pin*2);
+ data &= ~IO_APIC_REDIR_VECTOR_MASK;
+ data |= cfg->vector & 0xFF;
+ io_apic_modify(entry->apic, 0x10 + pin*2, data);
+
+ if (!entry->next)
+ break;
+ entry = irq_2_pin + entry->next;
+ }
}
- set_irq_info(irq, cpumask);
spin_unlock_irqrestore(&ioapic_lock, flags);
+
+}
+
+static void
+set_ioapic_affinity_irq(unsigned int irq, const struct cpumask mask)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ set_ioapic_affinity_irq_desc(desc, mask);
}
#endif /* CONFIG_SMP */
void /*__init*/ setup_ioapic_dest(void)
{
int pin, ioapic, irq, irq_entry;
+ struct irq_cfg *cfg;
if (skip_ioapic_setup == 1)
return;
if (irq_entry == -1)
continue;
irq = pin_2_irq(irq_entry, ioapic, pin);
- set_ioapic_affinity_irq(irq, TARGET_CPUS);
+ cfg = irq_cfg(irq);
+ BUG_ON(cpus_empty(cfg->domain));
+ set_ioapic_affinity_irq(irq, cfg->domain);
}
}
* EISA conforming in the MP table, that means its trigger type must
* be read in from the ELCR */
-#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
+#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
#define default_EISA_polarity(idx) (0)
/* ISA interrupts are always polarity zero edge triggered,
struct IO_APIC_route_entry entry;
int apic, pin, idx, irq, first_notcon = 1, vector;
unsigned long flags;
+ struct irq_cfg *cfg;
apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
for (apic = 0; apic < nr_ioapics; apic++) {
- for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
+ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
/*
* add it to the IO-APIC irq-routing table:
entry.delivery_mode = INT_DELIVERY_MODE;
entry.dest_mode = INT_DEST_MODE;
- entry.mask = 0; /* enable IRQ */
- entry.dest.logical.logical_dest =
- cpu_mask_to_apicid(TARGET_CPUS);
+ entry.mask = 0; /* enable IRQ */
idx = find_irq_entry(apic,pin,mp_INT);
if (idx == -1) {
if (IO_APIC_IRQ(irq)) {
vector = assign_irq_vector(irq);
+ BUG_ON(vector < 0);
entry.vector = vector;
ioapic_register_intr(irq, IOAPIC_AUTO);
if (!apic && (irq < 16))
disable_8259A_irq(irq);
}
+ cfg = irq_cfg(irq);
+ entry.dest.logical.logical_dest =
+ cpu_mask_to_apicid(cfg->domain);
spin_lock_irqsave(&ioapic_lock, flags);
io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
/* Initialise dynamic irq_2_pin free list. */
irq_2_pin = xmalloc_array(struct irq_pin_list, PIN_MAP_SIZE);
- memset(irq_2_pin, 0, nr_irqs_gsi * sizeof(*irq_2_pin));
+ memset(irq_2_pin, 0, PIN_MAP_SIZE * sizeof(*irq_2_pin));
+ pin_irq_map = xmalloc_array(int, nr_irqs_gsi);
+ memset(pin_irq_map, 0, nr_irqs_gsi * sizeof(int));
+
for (i = 0; i < PIN_MAP_SIZE; i++)
irq_2_pin[i].pin = -1;
for (i = irq_2_pin_free_entry = nr_irqs_gsi; i < PIN_MAP_SIZE; i++)
irq_2_pin[i].next = i + 1;
+ for (i = 0; i < nr_irqs_gsi; i++)
+ pin_irq_map[i] = -1;
for(apic = 0; apic < nr_ioapics; apic++) {
int pin;
*/
static void ack_edge_ioapic_irq(unsigned int irq)
{
- if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
+
+ if ((desc->status & (IRQ_PENDING | IRQ_DISABLED))
== (IRQ_PENDING | IRQ_DISABLED))
mask_IO_APIC_irq(irq);
ack_APIC_irq();
{
unsigned long v;
int i;
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
if ( ioapic_ack_new )
return;
{
struct irq_desc *desc = irq_to_desc(irq);
+ irq_complete_move(&desc);
+
if ( msi_maskable_irq(desc->msi_desc) )
ack_APIC_irq(); /* ACKTYPE_NONE */
}
static inline void check_timer(void)
{
int apic1, pin1, apic2, pin2;
- int vector;
+ int vector, ret;
unsigned long flags;
local_irq_save(flags);
* get/set the timer IRQ vector:
*/
disable_8259A_irq(0);
- vector = assign_irq_vector(0);
+ vector = FIRST_HIPRIORITY_VECTOR;
+ clear_irq_vector(0);
+ if ((ret = bind_irq_vector(0, vector, (cpumask_t)CPU_MASK_ALL)))
+ printk(KERN_ERR"..IRQ0 is not set correctly with ioapic!!!, err:%d\n", ret);
+
irq_desc[0].depth = 0;
irq_desc[0].status &= ~IRQ_DISABLED;
irq_desc[0].handler = &ioapic_edge_type;
{
struct IO_APIC_route_entry entry;
unsigned long flags;
+ int vector;
if (!IO_APIC_IRQ(irq)) {
printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
if (irq >= 16)
add_pin_to_irq(irq, ioapic, pin);
- entry.vector = assign_irq_vector(irq);
+ vector = assign_irq_vector(irq);
+ if (vector < 0)
+ return vector;
+ entry.vector = vector;
apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
"(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
/* Write first half from guest; second half is target info. */
*(u32 *)&new_rte = val;
- new_rte.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
/*
* What about weird destination types?
}
if ( old_rte.vector >= FIRST_DYNAMIC_VECTOR )
- old_irq = vector_irq[old_rte.vector];
+ old_irq = get_irq_from_apic_pin(apic, pin);
- if ( new_rte.vector >= FIRST_DYNAMIC_VECTOR )
- new_irq = vector_irq[new_rte.vector];
+ /* FIXME: dirty hack to support per-cpu vector. */
+ new_irq = new_rte.vector;
if ( (old_irq != new_irq) && (old_irq >= 0) && IO_APIC_IRQ(old_irq) )
{
/* Mask iff level triggered. */
new_rte.mask = new_rte.trigger;
+ /* Set the vector field to the real vector! */
+ new_rte.vector = irq_cfg[new_irq].vector;
}
else if ( !new_rte.mask )
{
new_rte.mask = 1;
}
+ new_rte.dest.logical.logical_dest =
+ cpu_mask_to_apicid(irq_cfg[new_irq].domain);
io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&new_rte) + 0));
io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&new_rte) + 1));
printk("vector=%u, delivery_mode=%u, dest_mode=%s, "
"delivery_status=%d, polarity=%d, irr=%d, "
- "trigger=%s, mask=%d\n",
+ "trigger=%s, mask=%d, dest_id:%d\n",
rte.vector, rte.delivery_mode,
rte.dest_mode ? "logical" : "physical",
rte.delivery_status, rte.polarity, rte.irr,
- rte.trigger ? "level" : "edge", rte.mask);
+ rte.trigger ? "level" : "edge", rte.mask,
+ rte.dest.logical.logical_dest);
if ( entry->next == 0 )
break;
#include <asm/msi.h>
#include <asm/current.h>
#include <asm/flushtlb.h>
+#include <asm/mach-generic/mach_apic.h>
#include <public/physdev.h>
/* opt_noirqbalance: If true, software IRQ balancing/affinity is disabled. */
#define IRQ_USED (1)
#define IRQ_RSVD (2)
+#define IRQ_VECTOR_UNASSIGNED (0)
+
+DECLARE_BITMAP(used_vectors, NR_VECTORS);
+
+struct irq_cfg __read_mostly *irq_cfg = NULL;
+
static struct timer *irq_guest_eoi_timer;
static DEFINE_SPINLOCK(vector_lock);
-int vector_irq[NR_VECTORS] __read_mostly = {
- [0 ... NR_VECTORS - 1] = FREE_TO_ASSIGN_IRQ
+
+DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
+ [0 ... NR_VECTORS - 1] = -1
};
+DEFINE_PER_CPU(struct cpu_user_regs *, __irq_regs);
+
+void lock_vector_lock(void)
+{
+ /* Used to the online set of cpus does not change
+ * during assign_irq_vector.
+ */
+ spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+ spin_unlock(&vector_lock);
+}
+
+static int __bind_irq_vector(int irq, int vector, cpumask_t domain)
+{
+ cpumask_t mask;
+ int cpu;
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ BUG_ON((unsigned)irq >= nr_irqs);
+ BUG_ON((unsigned)vector >= NR_VECTORS);
+
+ cpus_and(mask, domain, cpu_online_map);
+ if (cpus_empty(mask))
+ return -EINVAL;
+ if ((cfg->vector == vector) && cpus_equal(cfg->domain, domain))
+ return 0;
+ if (cfg->vector != IRQ_VECTOR_UNASSIGNED)
+ return -EBUSY;
+ for_each_cpu_mask(cpu, mask)
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ cfg->vector = vector;
+ cfg->domain = domain;
+ irq_status[irq] = IRQ_USED;
+ if (IO_APIC_IRQ(irq))
+ irq_vector[irq] = vector;
+ return 0;
+}
+
+int bind_irq_vector(int irq, int vector, cpumask_t domain)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&vector_lock, flags);
+ ret = __bind_irq_vector(irq, vector, domain);
+ spin_unlock_irqrestore(&vector_lock, flags);
+ return ret;
+}
+
static inline int find_unassigned_irq(void)
{
int irq;
irq = find_unassigned_irq();
if (irq < 0)
goto out;
- ret = __assign_irq_vector(irq);
+ ret = __assign_irq_vector(irq, irq_cfg(irq), TARGET_CPUS);
if (ret < 0)
irq = ret;
out:
void dynamic_irq_cleanup(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
- struct irqaction *action;
unsigned long flags;
+ struct irqaction *action;
spin_lock_irqsave(&desc->lock, flags);
desc->status |= IRQ_DISABLED;
xfree(action);
}
+static void init_one_irq_status(int irq);
+
static void __clear_irq_vector(int irq)
{
- int vector = irq_vector[irq];
- vector_irq[vector] = FREE_TO_ASSIGN_IRQ;
- irq_vector[irq] = 0;
- irq_status[irq] = IRQ_UNUSED;
+ int cpu, vector;
+ cpumask_t tmp_mask;
+ struct irq_cfg *cfg = irq_cfg(irq);
+
+ BUG_ON(!cfg->vector);
+
+ vector = cfg->vector;
+ cpus_and(tmp_mask, cfg->domain, cpu_online_map);
+
+ for_each_cpu_mask(cpu, tmp_mask)
+ per_cpu(vector_irq, cpu)[vector] = -1;
+
+ cfg->vector = IRQ_VECTOR_UNASSIGNED;
+ cpus_clear(cfg->domain);
+ init_one_irq_status(irq);
+
+ if (likely(!cfg->move_in_progress))
+ return;
+ for_each_cpu_mask(cpu, tmp_mask) {
+ for (vector = FIRST_DYNAMIC_VECTOR; vector <= LAST_DYNAMIC_VECTOR;
+ vector++) {
+ if (per_cpu(vector_irq, cpu)[vector] != irq)
+ continue;
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ break;
+ }
+ }
+
+ cfg->move_in_progress = 0;
}
void clear_irq_vector(int irq)
void destroy_irq(unsigned int irq)
{
+ BUG_ON(!MSI_IRQ(irq));
dynamic_irq_cleanup(irq);
clear_irq_vector(irq);
}
int irq_to_vector(int irq)
{
int vector = -1;
+ struct irq_cfg *cfg;
BUG_ON(irq >= nr_irqs || irq < 0);
- if (IO_APIC_IRQ(irq) || MSI_IRQ(irq))
+ if (IO_APIC_IRQ(irq))
vector = irq_vector[irq];
- else
+ else if(MSI_IRQ(irq)) {
+ cfg = irq_cfg(irq);
+ vector = cfg->vector;
+ } else
vector = LEGACY_VECTOR(irq);
return vector;
static void init_one_irq_desc(struct irq_desc *desc)
{
- desc->status = IRQ_DISABLED;
- desc->handler = &no_irq_type;
- desc->action = NULL;
- desc->depth = 1;
- desc->msi_desc = NULL;
- spin_lock_init(&desc->lock);
- cpus_setall(desc->affinity);
+ desc->status = IRQ_DISABLED;
+ desc->handler = &no_irq_type;
+ desc->action = NULL;
+ desc->depth = 1;
+ desc->msi_desc = NULL;
+ spin_lock_init(&desc->lock);
+ cpus_setall(desc->affinity);
}
static void init_one_irq_status(int irq)
irq_status[irq] = IRQ_UNUSED;
}
+static void init_one_irq_cfg(struct irq_cfg *cfg)
+{
+ cfg->vector = IRQ_VECTOR_UNASSIGNED;
+ cpus_clear(cfg->domain);
+ cpus_clear(cfg->old_domain);
+}
+
int init_irq_data(void)
{
struct irq_desc *desc;
+ struct irq_cfg *cfg;
int irq;
irq_desc = xmalloc_array(struct irq_desc, nr_irqs);
+ irq_cfg = xmalloc_array(struct irq_cfg, nr_irqs);
irq_status = xmalloc_array(int, nr_irqs);
irq_guest_eoi_timer = xmalloc_array(struct timer, nr_irqs);
- irq_vector = xmalloc_array(u8, nr_irqs);
+ irq_vector = xmalloc_array(u8, nr_irqs_gsi);
- if (!irq_desc || !irq_status ||! irq_vector || !irq_guest_eoi_timer)
- return -1;
+ if (!irq_desc || !irq_cfg || !irq_status ||! irq_vector ||
+ !irq_guest_eoi_timer)
+ return -ENOMEM;
memset(irq_desc, 0, nr_irqs * sizeof(*irq_desc));
+ memset(irq_cfg, 0, nr_irqs * sizeof(*irq_cfg));
memset(irq_status, 0, nr_irqs * sizeof(*irq_status));
- memset(irq_vector, 0, nr_irqs * sizeof(*irq_vector));
+ memset(irq_vector, 0, nr_irqs_gsi * sizeof(*irq_vector));
memset(irq_guest_eoi_timer, 0, nr_irqs * sizeof(*irq_guest_eoi_timer));
for (irq = 0; irq < nr_irqs; irq++) {
desc = irq_to_desc(irq);
+ cfg = irq_cfg(irq);
desc->irq = irq;
+ desc->chip_data = cfg;
init_one_irq_desc(desc);
+ init_one_irq_cfg(cfg);
init_one_irq_status(irq);
}
+ /* Never allocate the hypercall vector or Linux/BSD fast-trap vector. */
+ set_bit(LEGACY_SYSCALL_VECTOR, used_vectors);
+ set_bit(HYPERCALL_VECTOR, used_vectors);
+
+ /* IRQ_MOVE_CLEANUP_VECTOR used for clean up vectors */
+ set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
+
return 0;
}
atomic_t irq_err_count;
-int __assign_irq_vector(int irq)
+int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask)
{
- static unsigned current_vector = FIRST_DYNAMIC_VECTOR;
- unsigned vector;
+ /*
+ * NOTE! The local APIC isn't very good at handling
+ * multiple interrupts at the same interrupt level.
+ * As the interrupt level is determined by taking the
+ * vector number and shifting that right by 4, we
+ * want to spread these out a bit so that they don't
+ * all fall in the same interrupt level.
+ *
+ * Also, we've got to be careful not to trash gate
+ * 0x80, because int 0x80 is hm, kind of importantish. ;)
+ */
+ static int current_vector = FIRST_DYNAMIC_VECTOR, current_offset = 0;
+ unsigned int old_vector;
+ int cpu, err;
+ cpumask_t tmp_mask;
- BUG_ON(irq >= nr_irqs || irq < 0);
+ if ((cfg->move_in_progress) || cfg->move_cleanup_count)
+ return -EBUSY;
- if ((irq_to_vector(irq) > 0))
- return irq_to_vector(irq);
+ old_vector = irq_to_vector(irq);
+ if (old_vector) {
+ cpus_and(tmp_mask, mask, cpu_online_map);
+ cpus_and(tmp_mask, cfg->domain, tmp_mask);
+ if (!cpus_empty(tmp_mask)) {
+ cfg->vector = old_vector;
+ return 0;
+ }
+ }
- vector = current_vector;
- while (vector_irq[vector] != FREE_TO_ASSIGN_IRQ) {
- vector += 8;
- if (vector > LAST_DYNAMIC_VECTOR)
- vector = FIRST_DYNAMIC_VECTOR + ((vector + 1) & 7);
+ /* Only try and allocate irqs on cpus that are present */
+ cpus_and(mask, mask, cpu_online_map);
- if (vector == current_vector)
- return -ENOSPC;
- }
+ err = -ENOSPC;
+ for_each_cpu_mask(cpu, mask) {
+ int new_cpu;
+ int vector, offset;
- current_vector = vector;
- vector_irq[vector] = irq;
- irq_vector[irq] = vector;
- irq_status[irq] = IRQ_USED;
+ tmp_mask = vector_allocation_domain(cpu);
+ cpus_and(tmp_mask, tmp_mask, cpu_online_map);
- return vector;
+ vector = current_vector;
+ offset = current_offset;
+next:
+ vector += 8;
+ if (vector > LAST_DYNAMIC_VECTOR) {
+ /* If out of vectors on large boxen, must share them. */
+ offset = (offset + 1) % 8;
+ vector = FIRST_DYNAMIC_VECTOR + offset;
+ }
+ if (unlikely(current_vector == vector))
+ continue;
+
+ if (test_bit(vector, used_vectors))
+ goto next;
+
+ for_each_cpu_mask(new_cpu, tmp_mask)
+ if (per_cpu(vector_irq, new_cpu)[vector] != -1)
+ goto next;
+ /* Found one! */
+ current_vector = vector;
+ current_offset = offset;
+ if (old_vector) {
+ cfg->move_in_progress = 1;
+ cpus_copy(cfg->old_domain, cfg->domain);
+ }
+ for_each_cpu_mask(new_cpu, tmp_mask)
+ per_cpu(vector_irq, new_cpu)[vector] = irq;
+ cfg->vector = vector;
+ cpus_copy(cfg->domain, tmp_mask);
+
+ irq_status[irq] = IRQ_USED;
+ if (IO_APIC_IRQ(irq))
+ irq_vector[irq] = vector;
+ err = 0;
+ break;
+ }
+ return err;
}
int assign_irq_vector(int irq)
{
int ret;
unsigned long flags;
+ struct irq_cfg *cfg = &irq_cfg[irq];
+ BUG_ON(irq >= nr_irqs || irq <0);
+
spin_lock_irqsave(&vector_lock, flags);
- ret = __assign_irq_vector(irq);
+ ret = __assign_irq_vector(irq, cfg, TARGET_CPUS);
+ if (!ret)
+ ret = cfg->vector;
spin_unlock_irqrestore(&vector_lock, flags);
-
return ret;
}
+/*
+ * Initialize vector_irq on a new cpu. This function must be called
+ * with vector_lock held.
+ */
+void __setup_vector_irq(int cpu)
+{
+ int irq, vector;
+ struct irq_cfg *cfg;
+
+ /* Clear vector_irq */
+ for (vector = 0; vector < NR_VECTORS; ++vector)
+ per_cpu(vector_irq, cpu)[vector] = -1;
+ /* Mark the inuse vectors */
+ for (irq = 0; irq < nr_irqs; ++irq) {
+ cfg = irq_cfg(irq);
+ if (!cpu_isset(cpu, cfg->domain))
+ continue;
+ vector = irq_to_vector(irq);
+ per_cpu(vector_irq, cpu)[vector] = irq;
+ }
+}
asmlinkage void do_IRQ(struct cpu_user_regs *regs)
{
struct irqaction *action;
uint32_t tsc_in;
- unsigned int vector = regs->entry_vector;
- int irq = vector_irq[vector];
struct irq_desc *desc;
+ unsigned int vector = regs->entry_vector;
+ int irq = __get_cpu_var(vector_irq[vector]);
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
perfc_incr(irqs);
ack_APIC_irq();
printk("%s: %d.%d No irq handler for vector (irq %d)\n",
__func__, smp_processor_id(), vector, irq);
+ set_irq_regs(old_regs);
return;
}
TRACE_3D(TRC_TRACE_IRQ, irq, tsc_in, get_cycles());
irq_exit();
spin_unlock(&desc->lock);
+ set_irq_regs(old_regs);
return;
}
out:
desc->handler->end(irq);
spin_unlock(&desc->lock);
+ set_irq_regs(old_regs);
}
int request_irq(unsigned int irq,
#define ACKTYPE_UNMASK 1 /* Unmask PIC hardware (from any CPU) */
#define ACKTYPE_EOI 2 /* EOI on the CPU that was interrupted */
cpumask_t cpu_eoi_map; /* CPUs that need to EOI this interrupt */
+ u8 eoi_vector; /* vector awaiting the EOI*/
struct domain *guest[IRQ_MAX_GUESTS];
} irq_guest_action_t;
struct domain *d;
int i, sp, already_pending = 0;
struct pending_eoi *peoi = this_cpu(pending_eoi);
- int vector = irq_to_vector(irq);
+ int vector = get_irq_regs()->entry_vector;
if ( unlikely(action->nr_guests == 0) )
{
peoi[sp].ready = 0;
pending_eoi_sp(peoi) = sp+1;
cpu_set(smp_processor_id(), action->cpu_eoi_map);
+ action->eoi_vector = vector;
}
for ( i = 0; i < action->nr_guests; i++ )
while ( (--sp >= 0) && peoi[sp].ready )
{
- irq = vector_irq[peoi[sp].vector];
+ irq = __get_cpu_var(vector_irq[peoi[sp].vector]);
+ ASSERT(irq > 0);
desc = irq_to_desc(irq);
spin_lock(&desc->lock);
desc->handler->end(irq);
return;
sp = pending_eoi_sp(peoi);
+
do {
ASSERT(sp > 0);
- } while ( peoi[--sp].vector != irq_to_vector(irq) );
+ } while ( peoi[--sp].vector != action->eoi_vector );
ASSERT(!peoi[sp].ready);
peoi[sp].ready = 1;
}
static void dump_irqs(unsigned char key)
{
- int i, glob_irq, irq, vector;
+ int i, irq, pirq;
struct irq_desc *desc;
+ struct irq_cfg *cfg;
irq_guest_action_t *action;
struct domain *d;
unsigned long flags;
printk("Guest interrupt information:\n");
- for ( vector = 0; vector < NR_VECTORS; vector++ )
+ for ( irq = 0; irq < nr_irqs; irq++ )
{
- glob_irq = vector_to_irq(vector);
- if (glob_irq < 0)
- continue;
+ desc = irq_to_desc(irq);
+ cfg = desc->chip_data;
- desc = irq_to_desc(glob_irq);
- if ( desc == NULL || desc->handler == &no_irq_type )
+ if ( !desc->handler || desc->handler == &no_irq_type )
continue;
spin_lock_irqsave(&desc->lock, flags);
if ( !(desc->status & IRQ_GUEST) )
- printk(" Vec%3d IRQ%3d: type=%-15s status=%08x "
- "mapped, unbound\n",
- vector, glob_irq, desc->handler->typename, desc->status);
+ /* Only show CPU0 - CPU31's affinity info.*/
+ printk(" IRQ:%4d, IRQ affinity:0x%08x, Vec:%3d type=%-15s"
+ " status=%08x mapped, unbound\n",
+ irq, *(int*)cfg->domain.bits, cfg->vector,
+ desc->handler->typename, desc->status);
else
{
action = (irq_guest_action_t *)desc->action;
- printk(" Vec%3d IRQ%3d: type=%-15s status=%08x "
- "in-flight=%d domain-list=",
- vector, glob_irq, desc->handler->typename,
- desc->status, action->in_flight);
+ printk(" IRQ:%4d, IRQ affinity:0x%08x, Vec:%3d type=%-15s "
+ "status=%08x in-flight=%d domain-list=",
+ irq, *(int*)cfg->domain.bits, cfg->vector,
+ desc->handler->typename, desc->status, action->in_flight);
for ( i = 0; i < action->nr_guests; i++ )
{
d = action->guest[i];
- irq = domain_irq_to_pirq(d, vector_irq[vector]);
+ pirq = domain_irq_to_pirq(d, irq);
printk("%u:%3d(%c%c%c%c)",
- d->domain_id, irq,
- (test_bit(d->pirq_to_evtchn[glob_irq],
+ d->domain_id, pirq,
+ (test_bit(d->pirq_to_evtchn[pirq],
&shared_info(d, evtchn_pending)) ?
'P' : '-'),
- (test_bit(d->pirq_to_evtchn[glob_irq] /
+ (test_bit(d->pirq_to_evtchn[pirq] /
BITS_PER_EVTCHN_WORD(d),
&vcpu_info(d->vcpu[0], evtchn_pending_sel)) ?
'S' : '-'),
- (test_bit(d->pirq_to_evtchn[glob_irq],
+ (test_bit(d->pirq_to_evtchn[pirq],
&shared_info(d, evtchn_mask)) ?
'M' : '-'),
- (test_bit(glob_irq, d->pirq_mask) ?
+ (test_bit(pirq, d->pirq_mask) ?
'M' : '-'));
if ( i != action->nr_guests )
printk(",");
#include <asm/mach-generic/mach_apic.h>
#include <xen/delay.h>
-void fixup_irqs(cpumask_t map)
+/* A cpu has been removed from cpu_online_mask. Re-set irq affinities. */
+void fixup_irqs(void)
{
- unsigned int vector, sp;
+ unsigned int irq, sp;
static int warned;
+ struct irq_desc *desc;
irq_guest_action_t *action;
struct pending_eoi *peoi;
- irq_desc_t *desc;
- unsigned long flags;
-
- /* Direct all future interrupts away from this CPU. */
- for ( vector = 0; vector < NR_VECTORS; vector++ )
- {
- cpumask_t mask;
- if ( vector_to_irq(vector) == 2 )
+ for(irq = 0; irq < nr_irqs; irq++ ) {
+ int break_affinity = 0;
+ int set_affinity = 1;
+ cpumask_t affinity;
+ if (irq == 2)
continue;
+ desc = irq_to_desc(irq);
+ /* interrupt's are disabled at this point */
+ spin_lock(&desc->lock);
- desc = irq_to_desc(vector_to_irq(vector));
-
- spin_lock_irqsave(&desc->lock, flags);
+ affinity = desc->affinity;
+ if (!desc->action ||
+ cpus_equal(affinity, cpu_online_map)) {
+ spin_unlock(&desc->lock);
+ continue;
+ }
- cpus_and(mask, desc->affinity, map);
- if ( any_online_cpu(mask) == NR_CPUS )
+ cpus_and(affinity, affinity, cpu_online_map);
+ if ( any_online_cpu(affinity) == NR_CPUS )
{
- printk("Breaking affinity for vector %u (irq %i)\n",
- vector, vector_to_irq(vector));
- mask = map;
+ break_affinity = 1;
+ affinity = cpu_online_map;
}
- if ( desc->handler->set_affinity )
- desc->handler->set_affinity(vector, mask);
- else if ( desc->action && !(warned++) )
- printk("Cannot set affinity for vector %u (irq %i)\n",
- vector, vector_to_irq(vector));
- spin_unlock_irqrestore(&desc->lock, flags);
+ if (desc->handler->disable)
+ desc->handler->disable(irq);
+
+ if (desc->handler->set_affinity)
+ desc->handler->set_affinity(irq, affinity);
+ else if (!(warned++))
+ set_affinity = 0;
+
+ if (desc->handler->enable)
+ desc->handler->enable(irq);
+
+ spin_unlock(&desc->lock);
+
+ if (break_affinity && set_affinity)
+ printk("Broke affinity for irq %i\n", irq);
+ else if (!set_affinity)
+ printk("Cannot set affinity for irq %i\n", irq);
}
- /* Service any interrupts that beat us in the re-direction race. */
+ /* That doesn't seem sufficient. Give it 1ms. */
local_irq_enable();
mdelay(1);
local_irq_disable();
/* Clean up cpu_eoi_map of every interrupt to exclude this CPU. */
- for ( vector = 0; vector < NR_VECTORS; vector++ )
+ for ( irq = 0; irq < nr_irqs; irq++ )
{
- if ( !(irq_desc[vector_to_irq(vector)].status & IRQ_GUEST) )
+ desc = irq_to_desc(irq);
+ if ( !(desc->status & IRQ_GUEST) )
continue;
- action = (irq_guest_action_t *)irq_desc[vector_to_irq(vector)].action;
+ action = (irq_guest_action_t *)desc->action;
cpu_clear(smp_processor_id(), action->cpu_eoi_map);
}
struct msi_msg *msg)
{
unsigned dest;
- cpumask_t tmp;
- int vector = irq_to_vector(irq);
+ cpumask_t domain;
+ struct irq_cfg *cfg = irq_cfg(irq);
+ int vector = cfg->vector;
+ domain = cfg->domain;
+
+ if ( cpus_empty( domain ) ) {
+ dprintk(XENLOG_ERR,"%s, compose msi message error!!\n", __func__);
+ return;
+ }
- tmp = TARGET_CPUS;
- if ( vector )
- {
- dest = cpu_mask_to_apicid(tmp);
+ if ( vector ) {
+
+ dest = cpu_mask_to_apicid(domain);
msg->address_hi = MSI_ADDR_BASE_HI;
msg->address_lo =
void set_msi_affinity(unsigned int irq, cpumask_t mask)
{
- struct msi_desc *desc = irq_desc[irq].msi_desc;
struct msi_msg msg;
unsigned int dest;
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct msi_desc *msi_desc = desc->msi_desc;
+ struct irq_cfg *cfg = desc->chip_data;
+
+ dest = set_desc_affinity(desc, mask);
+ if (dest == BAD_APICID || !msi_desc)
+ return;
+
+ ASSERT(spin_is_locked(&desc->lock));
memset(&msg, 0, sizeof(msg));
+ read_msi_msg(msi_desc, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
cpus_and(mask, mask, cpu_online_map);
if ( cpus_empty(mask) )
mask = TARGET_CPUS;
if ( !desc )
return;
- ASSERT(spin_is_locked(&irq_desc[irq].lock));
- read_msi_msg(desc, &msg);
+ ASSERT(spin_is_locked(&desc->lock));
+ read_msi_msg(msi_desc, &msg);
+
+ msg.data &= ~MSI_DATA_VECTOR_MASK;
+ msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
- write_msi_msg(desc, &msg);
+ write_msi_msg(msi_desc, &msg);
}
static void msi_set_enable(struct pci_dev *dev, int enable)
case PHYSDEVOP_alloc_irq_vector: {
struct physdev_irq irq_op;
+ int vector;
ret = -EFAULT;
if ( copy_from_guest(&irq_op, arg, 1) != 0 )
irq = irq_op.irq;
ret = -EINVAL;
-
- irq_op.vector = assign_irq_vector(irq);
+
+ /* FIXME: Once dom0 breaks GSI IRQ limit, it is
+ a must to eliminate the limit here */
+ BUG_ON(irq >= 256);
+
+ vector = assign_irq_vector(irq);
+ if (vector >= FIRST_DYNAMIC_VECTOR)
+ irq_op.vector = irq;
+ else
+ irq_op.vector = -ENOSPC;
spin_lock(&pcidevs_lock);
spin_lock(&dom0->event_lock);
init_apic_mappings();
- init_IRQ();
-
percpu_init_areas();
+ init_IRQ();
+
xsm_init(&initrdidx, mbi, initial_images_start);
init_idle_domain();
* send_IPI_mask(cpumask, vector): sends @vector IPI to CPUs in @cpumask,
* excluding the local CPU. @cpumask may be empty.
*/
-#define send_IPI_mask (genapic->send_IPI_mask)
+
+void send_IPI_mask(const cpumask_t *mask, int vector)
+{
+ genapic->send_IPI_mask(mask, vector);
+}
/*
* Some notes on x86 processor bugs affecting SMP operation:
cpu_relax();
}
+static void __default_send_IPI_shortcut(unsigned int shortcut, int vector,
+ unsigned int dest)
+{
+ unsigned int cfg;
+
+ /*
+ * Wait for idle.
+ */
+ apic_wait_icr_idle();
+
+ /*
+ * prepare target chip field
+ */
+ cfg = __prepare_ICR(shortcut, vector) | dest;
+ /*
+ * Send the IPI. The write to APIC_ICR fires this off.
+ */
+ apic_write_around(APIC_ICR, cfg);
+}
+
+void send_IPI_self_flat(int vector)
+{
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+}
+
+void send_IPI_self_phys(int vector)
+{
+ __default_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
+}
+
+void send_IPI_self_x2apic(int vector)
+{
+ apic_write(APIC_SELF_IPI, vector);
+}
+
void send_IPI_mask_flat(const cpumask_t *cpumask, int vector)
{
unsigned long mask = cpus_addr(*cpumask)[0];
fastcall void smp_event_check_interrupt(struct cpu_user_regs *regs)
{
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
ack_APIC_irq();
perfc_incr(ipis);
+ set_irq_regs(old_regs);
}
static void __smp_call_function_interrupt(void)
fastcall void smp_call_function_interrupt(struct cpu_user_regs *regs)
{
+ struct cpu_user_regs *old_regs = set_irq_regs(regs);
+
ack_APIC_irq();
perfc_incr(ipis);
__smp_call_function_interrupt();
+ set_irq_regs(old_regs);
}
set_cpu_sibling_map(raw_smp_processor_id());
wmb();
+ /* Initlize vector_irq for BSPs */
+ lock_vector_lock();
+ __setup_vector_irq(smp_processor_id());
cpu_set(smp_processor_id(), cpu_online_map);
+ unlock_vector_lock();
+
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
init_percpu_time();
cpu_clear(cpu, cpu_sibling_setup_map);
}
-extern void fixup_irqs(cpumask_t map);
+extern void fixup_irqs(void);
int __cpu_disable(void)
{
- cpumask_t map = cpu_online_map;
int cpu = smp_processor_id();
/*
remove_siblinginfo(cpu);
- cpu_clear(cpu, map);
- fixup_irqs(map);
+ cpu_clear(cpu, cpu_online_map);
+ fixup_irqs();
/* It's now safe to remove this processor from the online map */
cpu_clear(cpu, cpu_online_map);
void __init smp_intr_init(void)
{
- int irq, seridx;
+ int irq, seridx, cpu = smp_processor_id();
/*
* IRQ0 must be given a fixed assignment and initialized,
* because it's used before the IO-APIC is set up.
*/
irq_vector[0] = FIRST_HIPRIORITY_VECTOR;
- vector_irq[FIRST_HIPRIORITY_VECTOR] = 0;
/*
* Also ensure serial interrupts are high priority. We do not
for (seridx = 0; seridx < 2; seridx++) {
if ((irq = serial_irq(seridx)) < 0)
continue;
- irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
- vector_irq[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
+ irq_vector[irq] = FIRST_HIPRIORITY_VECTOR + seridx + 1;
+ per_cpu(vector_irq, cpu)[FIRST_HIPRIORITY_VECTOR + seridx + 1] = irq;
+ irq_cfg[irq].vector = FIRST_HIPRIORITY_VECTOR + seridx + 1;
+ irq_cfg[irq].domain = (cpumask_t)CPU_MASK_ALL;
}
+ /* IPI for cleanuping vectors after irq move */
+ set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
+
/* IPI for event checking. */
set_intr_gate(EVENT_CHECK_VECTOR, event_check_interrupt);
#include <asm/msi.h>
#include <asm/hvm/svm/amd-iommu-proto.h>
#include <asm-x86/fixmap.h>
+#include <mach_apic.h>
static struct amd_iommu **irq_to_iommu;
static int nr_amd_iommus;
return -EFAULT;
}
-static void amd_iommu_msi_data_init(struct amd_iommu *iommu)
+static void iommu_msi_set_affinity(unsigned int irq, cpumask_t mask)
{
- u32 msi_data;
+ struct msi_msg msg;
+ unsigned int dest;
+ struct amd_iommu *iommu = irq_to_iommu[irq];
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_cfg *cfg = desc->chip_data;
u8 bus = (iommu->bdf >> 8) & 0xff;
u8 dev = PCI_SLOT(iommu->bdf & 0xff);
u8 func = PCI_FUNC(iommu->bdf & 0xff);
- int vector = irq_to_vector(iommu->irq);
-
- msi_data = MSI_DATA_TRIGGER_EDGE |
- MSI_DATA_LEVEL_ASSERT |
- MSI_DATA_DELIVERY_FIXED |
- MSI_DATA_VECTOR(vector);
-
- pci_conf_write32(bus, dev, func,
- iommu->msi_cap + PCI_MSI_DATA_64, msi_data);
-}
-static void amd_iommu_msi_addr_init(struct amd_iommu *iommu, int phy_cpu)
-{
-
- int bus = (iommu->bdf >> 8) & 0xff;
- int dev = PCI_SLOT(iommu->bdf & 0xff);
- int func = PCI_FUNC(iommu->bdf & 0xff);
+ dest = set_desc_affinity(desc, mask);
+ if (dest == BAD_APICID){
+ gdprintk(XENLOG_ERR, "Set iommu interrupt affinity error!\n");
+ return;
+ }
- u32 address_hi = 0;
- u32 address_lo = MSI_ADDR_HEADER |
- MSI_ADDR_DESTMODE_PHYS |
- MSI_ADDR_REDIRECTION_CPU |
- MSI_ADDR_DEST_ID(phy_cpu);
+ memset(&msg, 0, sizeof(msg));
+ msg.data = MSI_DATA_VECTOR(cfg->vector) & 0xff;
+ msg.data |= 1 << 14;
+ msg.data |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED:
+ MSI_DATA_DELIVERY_LOWPRI;
+
+ msg.address_hi =0;
+ msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
+ msg.address_lo |= INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC:
+ MSI_ADDR_DESTMODE_PHYS;
+ msg.address_lo |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU:
+ MSI_ADDR_REDIRECTION_LOWPRI;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest & 0xff);
pci_conf_write32(bus, dev, func,
- iommu->msi_cap + PCI_MSI_ADDRESS_LO, address_lo);
+ iommu->msi_cap + PCI_MSI_DATA_64, msg.data);
pci_conf_write32(bus, dev, func,
- iommu->msi_cap + PCI_MSI_ADDRESS_HI, address_hi);
+ iommu->msi_cap + PCI_MSI_ADDRESS_LO, msg.address_lo);
+ pci_conf_write32(bus, dev, func,
+ iommu->msi_cap + PCI_MSI_ADDRESS_HI, msg.address_hi);
+
}
static void amd_iommu_msi_enable(struct amd_iommu *iommu, int flag)
{
unsigned long flags;
struct amd_iommu *iommu = irq_to_iommu[irq];
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
/* FIXME: do not support mask bits at the moment */
if ( iommu->maskbit )
ack_APIC_irq();
}
-static void iommu_msi_set_affinity(unsigned int irq, cpumask_t dest)
-{
- struct amd_iommu *iommu = irq_to_iommu[irq];
- amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
-}
static struct hw_interrupt_type iommu_msi_type = {
.typename = "AMD_IOV_MSI",
gdprintk(XENLOG_ERR VTDPREFIX, "IOMMU: no irqs\n");
return 0;
}
-
+
irq_desc[irq].handler = &iommu_msi_type;
irq_to_iommu[irq] = iommu;
ret = request_irq(irq, amd_iommu_page_fault, 0,
register_iommu_event_log_in_mmio_space(iommu);
register_iommu_exclusion_range(iommu);
- amd_iommu_msi_data_init (iommu);
- amd_iommu_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
+ iommu_msi_set_affinity(iommu->irq, cpu_online_map);
amd_iommu_msi_enable(iommu, IOMMU_CONTROL_ENABLED);
set_iommu_command_buffer_control(iommu, IOMMU_CONTROL_ENABLED);
{
unsigned long flags;
struct iommu *iommu = irq_to_iommu[irq];
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ irq_complete_move(&desc);
/* mask it */
spin_lock_irqsave(&iommu->register_lock, flags);
ack_APIC_irq();
}
-static void dma_msi_data_init(struct iommu *iommu, int irq)
-{
- u32 msi_data = 0;
- unsigned long flags;
- int vector = irq_to_vector(irq);
-
- /* Fixed, edge, assert mode. Follow MSI setting */
- msi_data |= vector & 0xff;
- msi_data |= 1 << 14;
-
- spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writel(iommu->reg, DMAR_FEDATA_REG, msi_data);
- spin_unlock_irqrestore(&iommu->register_lock, flags);
-}
-
-static void dma_msi_addr_init(struct iommu *iommu, int phy_cpu)
+static void dma_msi_set_affinity(unsigned int irq, cpumask_t mask)
{
- u64 msi_address;
+ struct msi_msg msg;
+ unsigned int dest;
unsigned long flags;
- /* Physical, dedicated cpu. Follow MSI setting */
- msi_address = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
- msi_address |= MSI_PHYSICAL_MODE << 2;
- msi_address |= MSI_REDIRECTION_HINT_MODE << 3;
- msi_address |= phy_cpu << MSI_TARGET_CPU_SHIFT;
+ struct iommu *iommu = irq_to_iommu[irq];
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irq_cfg *cfg = desc->chip_data;
spin_lock_irqsave(&iommu->register_lock, flags);
- dmar_writel(iommu->reg, DMAR_FEADDR_REG, (u32)msi_address);
- dmar_writel(iommu->reg, DMAR_FEUADDR_REG, (u32)(msi_address >> 32));
+ dest = set_desc_affinity(desc, mask);
+ if (dest == BAD_APICID){
+ gdprintk(XENLOG_ERR VTDPREFIX, "Set iommu interrupt affinity error!\n");
+ return;
+ }
+
+ memset(&msg, 0, sizeof(msg));
+ msg.data = MSI_DATA_VECTOR(cfg->vector) & 0xff;
+ msg.data |= 1 << 14;
+ msg.data |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_DATA_DELIVERY_FIXED:
+ MSI_DATA_DELIVERY_LOWPRI;
+
+ /* Follow MSI setting */
+ if (x2apic_enabled)
+ msg.address_hi = dest & 0xFFFFFF00;
+ msg.address_lo = (MSI_ADDRESS_HEADER << (MSI_ADDRESS_HEADER_SHIFT + 8));
+ msg.address_lo |= INT_DEST_MODE ? MSI_ADDR_DESTMODE_LOGIC:
+ MSI_ADDR_DESTMODE_PHYS;
+ msg.address_lo |= (INT_DELIVERY_MODE != dest_LowestPrio) ?
+ MSI_ADDR_REDIRECTION_CPU:
+ MSI_ADDR_REDIRECTION_LOWPRI;
+ msg.address_lo |= MSI_ADDR_DEST_ID(dest & 0xff);
+
+ dmar_writel(iommu->reg, DMAR_FEDATA_REG, msg.data);
+ dmar_writel(iommu->reg, DMAR_FEADDR_REG, msg.address_lo);
+ dmar_writel(iommu->reg, DMAR_FEUADDR_REG, msg.address_hi);
spin_unlock_irqrestore(&iommu->register_lock, flags);
}
-static void dma_msi_set_affinity(unsigned int irq, cpumask_t dest)
-{
- struct iommu *iommu = irq_to_iommu[irq];
- dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(dest)));
-}
-
static struct hw_interrupt_type dma_msi_type = {
.typename = "DMA_MSI",
.startup = dma_msi_startup,
int irq = -1;
int ret;
unsigned long flags;
+ struct irq_cfg *cfg;
for_each_drhd_unit ( drhd )
{
}
iommu->irq = irq;
}
- dma_msi_data_init(iommu, iommu->irq);
- dma_msi_addr_init(iommu, cpu_physical_id(first_cpu(cpu_online_map)));
+
+ cfg = irq_cfg(irq);
+ dma_msi_set_affinity(irq, cfg->domain);
+
clear_fault_bits(iommu);
spin_lock_irqsave(&iommu->register_lock, flags);
#define APIC_VERBOSE 1
#define APIC_DEBUG 2
+#define SET_APIC_LOGICAL_ID(x) (((x)<<24))
+
+#define IO_APIC_REDIR_VECTOR_MASK 0x000FF
+#define IO_APIC_REDIR_DEST_LOGICAL 0x00800
+#define IO_APIC_REDIR_DEST_PHYSICAL 0x00000
+
extern int apic_verbosity;
extern int x2apic_enabled;
#define APIC_TDCR 0x3E0
/* Only available in x2APIC mode */
-#define APIC_SELF_IPI 0x400
+#define APIC_SELF_IPI 0x3F0
#define APIC_TDR_DIV_TMBASE (1<<2)
#define APIC_TDR_DIV_1 0xB
void (*init_apic_ldr)(void);
void (*clustered_apic_check)(void);
cpumask_t (*target_cpus)(void);
+ cpumask_t (*vector_allocation_domain)(int cpu);
unsigned int (*cpu_mask_to_apicid)(cpumask_t cpumask);
void (*send_IPI_mask)(const cpumask_t *mask, int vector);
+ void (*send_IPI_self)(int vector);
};
#define APICFUNC(x) .x = x
cpumask_t target_cpus_flat(void);
unsigned int cpu_mask_to_apicid_flat(cpumask_t cpumask);
void send_IPI_mask_flat(const cpumask_t *mask, int vector);
+void send_IPI_self_flat(int vector);
+cpumask_t vector_allocation_domain_flat(int cpu);
#define GENAPIC_FLAT \
.int_delivery_mode = dest_LowestPrio, \
.int_dest_mode = 1 /* logical delivery */, \
.init_apic_ldr = init_apic_ldr_flat, \
.clustered_apic_check = clustered_apic_check_flat, \
.target_cpus = target_cpus_flat, \
+ .vector_allocation_domain = vector_allocation_domain_flat, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_flat, \
- .send_IPI_mask = send_IPI_mask_flat
+ .send_IPI_mask = send_IPI_mask_flat, \
+ .send_IPI_self = send_IPI_self_flat
void init_apic_ldr_x2apic(void);
void clustered_apic_check_x2apic(void);
cpumask_t target_cpus_x2apic(void);
unsigned int cpu_mask_to_apicid_x2apic(cpumask_t cpumask);
void send_IPI_mask_x2apic(const cpumask_t *mask, int vector);
+void send_IPI_self_x2apic(int vector);
+cpumask_t vector_allocation_domain_x2apic(int cpu);
#define GENAPIC_X2APIC \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
.init_apic_ldr = init_apic_ldr_x2apic, \
.clustered_apic_check = clustered_apic_check_x2apic, \
.target_cpus = target_cpus_x2apic, \
+ .vector_allocation_domain = vector_allocation_domain_x2apic, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_x2apic, \
- .send_IPI_mask = send_IPI_mask_x2apic
+ .send_IPI_mask = send_IPI_mask_x2apic, \
+ .send_IPI_self = send_IPI_self_x2apic
void init_apic_ldr_phys(void);
void clustered_apic_check_phys(void);
cpumask_t target_cpus_phys(void);
unsigned int cpu_mask_to_apicid_phys(cpumask_t cpumask);
void send_IPI_mask_phys(const cpumask_t *mask, int vector);
+void send_IPI_self_phys(int vector);
+cpumask_t vector_allocation_domain_phys(int cpu);
#define GENAPIC_PHYS \
.int_delivery_mode = dest_Fixed, \
.int_dest_mode = 0 /* physical delivery */, \
.init_apic_ldr = init_apic_ldr_phys, \
.clustered_apic_check = clustered_apic_check_phys, \
.target_cpus = target_cpus_phys, \
+ .vector_allocation_domain = vector_allocation_domain_phys, \
.cpu_mask_to_apicid = cpu_mask_to_apicid_phys, \
- .send_IPI_mask = send_IPI_mask_phys
+ .send_IPI_mask = send_IPI_mask_phys, \
+ .send_IPI_self = send_IPI_self_phys
#endif
#include <xen/config.h>
#include <asm/atomic.h>
+#include <xen/cpumask.h>
+#include <xen/smp.h>
#include <irq_vectors.h>
+#include <asm/percpu.h>
#define IO_APIC_IRQ(irq) (((irq) >= 16 && (irq) < nr_irqs_gsi) \
|| (((irq) < 16) && (1<<(irq)) & io_apic_irqs))
#define MAX_GSI_IRQS PAGE_SIZE * 8
#define MAX_NR_IRQS (2 * MAX_GSI_IRQS)
-extern int vector_irq[NR_VECTORS];
+#define irq_cfg(irq) &irq_cfg[(irq)]
+
+struct irq_cfg {
+ int vector;
+ cpumask_t domain;
+ cpumask_t old_domain;
+ unsigned move_cleanup_count;
+ u8 move_in_progress : 1;
+};
+
+extern struct irq_cfg *irq_cfg;
+
+typedef int vector_irq_t[NR_VECTORS];
+DECLARE_PER_CPU(vector_irq_t, vector_irq);
+
extern u8 *irq_vector;
-extern int irq_to_vector(int irq);
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack
+ */
+DECLARE_PER_CPU(struct cpu_user_regs *, __irq_regs);
+
+static inline struct cpu_user_regs *get_irq_regs(void)
+{
+ return __get_cpu_var(__irq_regs);
+}
+
+static inline struct cpu_user_regs *set_irq_regs(struct cpu_user_regs *new_regs)
+{
+ struct cpu_user_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+
+ old_regs = *pp_regs;
+ *pp_regs = new_regs;
+ return old_regs;
+}
+
+
#define platform_legacy_irq(irq) ((irq) < 16)
fastcall void event_check_interrupt(void);
fastcall void spurious_interrupt(void);
fastcall void thermal_interrupt(void);
fastcall void cmci_interrupt(void);
+fastcall void irq_move_cleanup_interrupt(void);
void disable_8259A_irq(unsigned int irq);
void enable_8259A_irq(unsigned int irq);
int init_irq_data(void);
void clear_irq_vector(int irq);
-int __assign_irq_vector(int irq);
+int irq_to_vector(int irq);
int create_irq(void);
void destroy_irq(unsigned int irq);
+struct irq_desc;
+extern void irq_complete_move(struct irq_desc **descp);
+
+void lock_vector_lock(void);
+void unlock_vector_lock(void);
+
+void __setup_vector_irq(int cpu);
+
+void move_native_irq(int irq);
+
+int __assign_irq_vector(int irq, struct irq_cfg *cfg, cpumask_t mask);
+
+int bind_irq_vector(int irq, int vector, cpumask_t domain);
+
#define domain_pirq_to_irq(d, pirq) ((d)->arch.pirq_irq[pirq])
#define domain_irq_to_pirq(d, irq) ((d)->arch.irq_pirq[irq])
/* Dynamically-allocated vectors available to any driver. */
#define FIRST_DYNAMIC_VECTOR 0x20
#define LAST_DYNAMIC_VECTOR 0xdf
+#define NR_DYNAMIC_VECTORS (LAST_DYNAMIC_VECTOR - FIRST_DYNAMIC_VECTOR + 1)
+
+#define IRQ_MOVE_CLEANUP_VECTOR FIRST_DYNAMIC_VECTOR
#define NR_VECTORS 256
#define init_apic_ldr (genapic->init_apic_ldr)
#define clustered_apic_check (genapic->clustered_apic_check)
#define cpu_mask_to_apicid (genapic->cpu_mask_to_apicid)
+#define vector_allocation_domain(cpu) (genapic->vector_allocation_domain(cpu))
static inline void enable_apic_mode(void)
{
void smp_send_nmi_allbutself(void);
+void send_IPI_mask(const cpumask_t *mask, int vector);
+
extern void (*mtrr_hook) (void);
#ifdef CONFIG_X86_64
#include <xen/bitmap.h>
#include <xen/kernel.h>
-typedef struct { DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
+typedef struct cpumask{ DECLARE_BITMAP(bits, NR_CPUS); } cpumask_t;
#define cpu_set(cpu, dst) __cpu_set((cpu), &(dst))
static inline void __cpu_set(int cpu, volatile cpumask_t *dstp)
return test_and_set_bit(cpu, addr->bits);
}
+/**
+ * cpumask_test_cpu - test for a cpu in a cpumask
+ */
+#define cpumask_test_cpu(cpu, cpumask) __cpu_test((cpu), &(cpumask))
+
+static inline int __cpu_test(int cpu, cpumask_t *addr)
+{
+ return test_bit(cpu, addr->bits);
+}
+
#define cpu_test_and_clear(cpu, cpumask) __cpu_test_and_clear((cpu), &(cpumask))
static inline int __cpu_test_and_clear(int cpu, cpumask_t *addr)
{
return bitmap_weight(srcp->bits, nbits);
}
+#define cpus_copy(dest, src) __cpus_copy(&(dest), &(src))
+static inline void __cpus_copy(cpumask_t *dstp, cpumask_t *srcp)
+{
+ bitmap_copy(dstp->bits, srcp->bits, NR_CPUS);
+}
+
#define cpus_shift_right(dst, src, n) \
__cpus_shift_right(&(dst), &(src), (n), NR_CPUS)
static inline void __cpus_shift_right(cpumask_t *dstp,
struct msi_desc *msi_desc;
struct irqaction *action; /* IRQ action list */
unsigned int depth; /* nested irq disables */
+#if defined(__i386__) || defined(__x86_64__)
+ struct irq_cfg *chip_data;
+#endif
int irq;
spinlock_t lock;
cpumask_t affinity;
} __cacheline_aligned irq_desc_t;
-#ifndef CONFIG_X86
+#if defined(__ia64__)
extern irq_desc_t irq_desc[NR_VECTORS];
#define setup_irq(irq, action) \
static inline void set_irq_info(int irq, cpumask_t mask)
{
-#ifdef CONFIG_X86
+#if defined(__i386__) || defined(__x86_64__)
set_native_irq_info(irq, mask);
#else
set_native_irq_info(irq_to_vector(irq), mask);
#endif
}
+unsigned int set_desc_affinity(struct irq_desc *desc, cpumask_t mask);
+
#endif /* __XEN_IRQ_H__ */